GloVe 2

Here we create a feature space by averaging the word vectors in a paragraph. We then train a random forrest classifier for the data.

Without using k-means (see Word2Vec 2) to improve the vectors we just get a humbling 0.79216 in accuracy score with this model.


In [1]:
import numpy as np  # Make sure that numpy is imported
from glove import Glove, Corpus
import pandas as pd
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
from sklearn.cluster import KMeans
import time

In [2]:
model = Glove.load('glove.model')

# let's monkey patch the glove object (model)

def glove_getitem(model, word):
    try:
        word_idx = model.dictionary[word]
    except KeyError:
        raise Exception('Word not in dictionary')

    return model.word_vectors[word_idx]

def glove_contains(model, word):
    return word in model.dictionary

In [3]:
train = pd.read_csv( "labeledTrainData.tsv", header=0, 
 delimiter="\t", quoting=3 )
test = pd.read_csv( "testData.tsv", header=0, delimiter="\t", quoting=3 )
num_features = 100

def review_to_wordlist( review, remove_stopwords=False ):
    # Function to convert a document to a sequence of words,
    # optionally removing stop words.  Returns a list of words.
    #
    # 1. Remove HTML
    review_text = BeautifulSoup(review).get_text()
    #  
    # 2. Remove non-letters
    review_text = re.sub("[^a-zA-Z]"," ", review_text)
    #
    # 3. Convert words to lower case and split them
    words = review_text.lower().split()
    #
    # 4. Optionally remove stop words (false by default)
    if remove_stopwords:
        stops = set(stopwords.words("english"))
        words = [w for w in words if not w in stops]
    #
    # 5. Return a list of words
    return(words)

In [4]:
def makeFeatureVec(words, model, num_features):
    # Function to average all of the word vectors in a given
    # paragraph
    #
    # Pre-initialize an empty numpy array (for speed)
    featureVec = np.zeros((num_features,),dtype="float32")
    #
    nwords = 0.

    #
    # Loop over each word in the review and, if it is in the model's
    # vocaublary, add its feature vector to the total
    for word in words:
        if glove_contains(model, word):
            nwords = nwords + 1.
            featureVec = np.add(featureVec, glove_getitem(model, word))

    # 
    # Divide the result by the number of words to get the average
    featureVec = np.divide(featureVec,nwords)
    return featureVec


def getAvgFeatureVecs(reviews, model, num_features):
    # Given a set of reviews (each one a list of words), calculate 
    # the average feature vector for each one and return a 2D numpy array 
    # 
    # Initialize a counter
    counter = 0.
    # 
    # Preallocate a 2D numpy array, for speed
    reviewFeatureVecs = np.zeros((len(reviews),num_features),dtype="float32")
    # 
    # Loop through the reviews
    for review in reviews:
       #
       # Print a status message every 1000th review
       if counter%1000. == 0.:
           print "Review %d of %d" % (counter, len(reviews))
       # 
       # Call the function (defined above) that makes average feature vectors
       reviewFeatureVecs[counter] = makeFeatureVec(review, model, \
           num_features)
       #
       # Increment the counter
       counter = counter + 1.
    return reviewFeatureVecs

In [5]:
clean_train_reviews = []

for review in train["review"]:
    clean_train_reviews.append( review_to_wordlist( review, \
        remove_stopwords=True ))

trainDataVecs = getAvgFeatureVecs( clean_train_reviews, model, num_features )

print "Creating average feature vecs for test reviews"
clean_test_reviews = []
for review in test["review"]:
    clean_test_reviews.append( review_to_wordlist( review, \
        remove_stopwords=True ))

testDataVecs = getAvgFeatureVecs( clean_test_reviews, model, num_features )


Review 0 of 25000
Review 1000 of 25000
Review 2000 of 25000
Review 3000 of 25000
Review 4000 of 25000
Review 5000 of 25000
Review 6000 of 25000
Review 7000 of 25000
Review 8000 of 25000
Review 9000 of 25000
Review 10000 of 25000
Review 11000 of 25000
Review 12000 of 25000
Review 13000 of 25000
Review 14000 of 25000
Review 15000 of 25000
Review 16000 of 25000
Review 17000 of 25000
Review 18000 of 25000
Review 19000 of 25000
Review 20000 of 25000
Review 21000 of 25000
Review 22000 of 25000
Review 23000 of 25000
Review 24000 of 25000
Creating average feature vecs for test reviews
Review 0 of 25000
Review 1000 of 25000
Review 2000 of 25000
Review 3000 of 25000
Review 4000 of 25000
Review 5000 of 25000
Review 6000 of 25000
Review 7000 of 25000
Review 8000 of 25000
Review 9000 of 25000
Review 10000 of 25000
Review 11000 of 25000
Review 12000 of 25000
Review 13000 of 25000
Review 14000 of 25000
Review 15000 of 25000
Review 16000 of 25000
Review 17000 of 25000
Review 18000 of 25000
Review 19000 of 25000
Review 20000 of 25000
Review 21000 of 25000
Review 22000 of 25000
Review 23000 of 25000
Review 24000 of 25000

In [7]:
# Fit a random forest to the training data, using 100 trees
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier( n_estimators = 100 )

print "Fitting a random forest to labeled training data..."
forest = forest.fit( trainDataVecs, train["sentiment"] )

# Test & extract results 
result = forest.predict( testDataVecs )

# Write the test results 
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
output.to_csv( "GloVe_AverageVectors.csv", index=False, quoting=3 )


Fitting a random forest to labeled training data...

This would be another shot at a better result, but as I have to take my MacBook to work I killed it after 11 hours.


In [8]:
start = time.time() # Start time

# Set "k" (num_clusters) to be 1/5th of the vocabulary size, or an
# average of 5 words per cluster
word_vectors = model.word_vectors
num_clusters = word_vectors.shape[0] / 5

# Initalize a k-means object and use it to extract centroids
kmeans_clustering = KMeans( n_clusters = num_clusters )
idx = kmeans_clustering.fit_predict( word_vectors )

# Get the end time and print how long the process took
end = time.time()
elapsed = end - start
print "Time taken for K Means clustering: ", elapsed, "seconds."


---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-8-11cc568db8d8> in <module>()
      8 # Initalize a k-means object and use it to extract centroids
      9 kmeans_clustering = KMeans( n_clusters = num_clusters )
---> 10 idx = kmeans_clustering.fit_predict( word_vectors )
     11 
     12 # Get the end time and print how long the process took

/usr/local/lib/python2.7/site-packages/sklearn/cluster/k_means_.pyc in fit_predict(self, X, y)
    801         predict(X).
    802         """
--> 803         return self.fit(X).labels_
    804 
    805     def fit_transform(self, X, y=None):

/usr/local/lib/python2.7/site-packages/sklearn/cluster/k_means_.pyc in fit(self, X, y)
    792                 precompute_distances=self.precompute_distances,
    793                 tol=self.tol, random_state=random_state, copy_x=self.copy_x,
--> 794                 n_jobs=self.n_jobs)
    795         return self
    796 

/usr/local/lib/python2.7/site-packages/sklearn/cluster/k_means_.pyc in k_means(X, n_clusters, init, precompute_distances, n_init, max_iter, verbose, tol, random_state, copy_x, n_jobs, return_n_iter)
    299                 X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
    300                 precompute_distances=precompute_distances, tol=tol,
--> 301                 x_squared_norms=x_squared_norms, random_state=random_state)
    302             # determine if these results are the best so far
    303             if best_inertia is None or inertia < best_inertia:

/usr/local/lib/python2.7/site-packages/sklearn/cluster/k_means_.pyc in _kmeans_single(X, n_clusters, x_squared_norms, max_iter, init, verbose, random_state, tol, precompute_distances)
    422             _labels_inertia(X, x_squared_norms, centers,
    423                             precompute_distances=precompute_distances,
--> 424                             distances=distances)
    425 
    426         # computation of the means is also called the M-step of EM

/usr/local/lib/python2.7/site-packages/sklearn/cluster/k_means_.pyc in _labels_inertia(X, x_squared_norms, centers, precompute_distances, distances)
    542                                                     centers, distances)
    543         inertia = _k_means._assign_labels_array(
--> 544             X, x_squared_norms, centers, labels, distances=distances)
    545     return labels, inertia
    546 

KeyboardInterrupt: 

In [ ]: